bitkeeper revision 1.1733 (42ba827d4k5gz1Dasgd0gorMu-SSmQ)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Jun 2005 09:35:57 +0000 (09:35 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Jun 2005 09:35:57 +0000 (09:35 +0000)
The 32-bit x86 LTP exposed bugs with LDT handling with x86_64 Xen and
XenLinux:
- fill the code for arbitrary_virt_to_machine(XenLinux)
- set 64-bit value for the base address for LDT (Xen)
- fix a bug (64-bit cleanup) in map_ldt_shadow_page (Xen)

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c
linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
xen/arch/x86/mm.c
xen/include/asm-x86/ldt.h

index 9111fb80d024f56c34315ee761269300b1721f9c..855128a4b2e2df19e7bd8865eba853c63c8bbb38 100644 (file)
@@ -62,7 +62,6 @@ static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
        if (reload) {
 #ifdef CONFIG_SMP
                cpumask_t mask;
-
                preempt_disable();
 #endif
                make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
@@ -73,8 +72,6 @@ static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
                if (!cpus_equal(current->mm->cpu_vm_mask, mask))
                        smp_call_function(flush_ldt, NULL, 1, 1);
                preempt_enable();
-#else
-               load_LDT(pc);
 #endif
        }
        if (oldsize) {
@@ -188,13 +185,12 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
 {
        struct task_struct *me = current;
        struct mm_struct * mm = me->mm;
-       unsigned long entry = 0, *lp;
+       __u32 entry_1, entry_2, *lp;
        unsigned long mach_lp;
        int error;
        struct user_desc ldt_info;
 
        error = -EINVAL;
-
        if (bytecount != sizeof(ldt_info))
                goto out;
        error = -EFAULT;        
@@ -218,26 +214,26 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
                        goto out_unlock;
        }
 
-       lp = (unsigned long *)((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-       mach_lp = arbitrary_virt_to_machine(lp);
+       lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+       mach_lp = arbitrary_virt_to_machine(lp);
 
        /* Allow LDTs to be cleared by the user. */
        if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
                if (oldmode || LDT_empty(&ldt_info)) {
-                        entry = 0;
+                       entry_1 = 0;
+                       entry_2 = 0;
                        goto install;
                }
        }
 
-#if 0
-       entry = LDT_entry(&ldt_info);
-#endif
+       entry_1 = LDT_entry_a(&ldt_info);
+       entry_2 = LDT_entry_b(&ldt_info);
        if (oldmode)
-               entry &= ~(1 << 20);
+               entry_2 &= ~(1 << 20);
 
        /* Install the new entry ...  */
 install:
-       error = HYPERVISOR_update_descriptor(mach_lp, entry);
+       error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
 
 out_unlock:
        up(&mm->context.sem);
index 5b86bd7978314155761c12acabfdc6d84ea72aad..9745edc23b5380f22dbf5d1cef216da63ef66b62 100644 (file)
@@ -30,7 +30,20 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 
 extern unsigned long pgkern_mask;
 
-#define arbitrary_virt_to_machine(__va) ({0;})
+#define virt_to_ptep(__va)                                             \
+({                                                                     \
+       pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));             \
+       pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));        \
+       pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));        \
+       pte_offset_kernel(__pmd, (unsigned long)(__va));                \
+})
+
+#define arbitrary_virt_to_machine(__va)                                        \
+({                                                                     \
+       pte_t *__pte = virt_to_ptep(__va);                              \
+       unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK;     \
+       __pa | ((unsigned long)(__va) & (PAGE_SIZE-1));                 \
+})
 
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
@@ -210,6 +223,7 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
index d0de3d0d0eced249fd2588b39517683e08b7b8bf..52b404890975c77d2aa9c93ae73be081c1d770de 100644 (file)
@@ -288,7 +288,7 @@ int map_ldt_shadow_page(unsigned int off)
     struct domain *d = v->domain;
     unsigned long gpfn, gmfn;
     l1_pgentry_t l1e, nl1e;
-    unsigned gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
+    unsigned long gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
     int res;
 
 #if defined(__x86_64__)
index 8288ffa1188e72ef3afc4ac7afa0c8fac960ba87..107e67720b8ff9191c8e2312c1cda18fe1c86eca 100644 (file)
@@ -18,9 +18,7 @@ static inline void load_LDT(struct vcpu *v)
     {
         cpu = smp_processor_id();
         desc = gdt_table + __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
-        desc->a = ((LDT_VIRT_START(v)&0xffff)<<16) | (ents*8-1);
-        desc->b = (LDT_VIRT_START(v)&(0xff<<24)) | 0x8200 |
-            ((LDT_VIRT_START(v)&0xff0000)>>16);
+        _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, 2);
         __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
     }
 }